Objective:

In this tutorial we will create a simple gravity problem from scratch using the SimPEG framework.

The relation between density and the gravity field is well known, thanks to the classic work of Newton in 1686. Since we generally only measure the vertical component of the field, this relationship can be written as: $$G(r)_z = \gamma \int_{V} \rho(r) \left(\frac{z - z_0}{{|\vec r - \vec r_0|}^3}\right) \; dV $$ where $\rho$ is the anomalous density and $\gamma$ is the Newton's gravitational constant. Once again, this integral can be evaluated analytically for simple prisms, giving rise to a linear system of equations relating a discrete Earth to the observed data:| $$ \mathbf{d}_z = \mathbf{G}_z \; \rho $$


In [1]:
## Need to be on \pf\dev branch !!!
%pylab inline 
import SimPEG.PF as PF
from SimPEG import *
from SimPEG.Utils import io_utils


Populating the interactive namespace from numpy and matplotlib
Efficiency Warning: Interpolation will be slow, use setup.py!

            python setup.py build_ext --inplace
    

In [2]:
import matplotlib
matplotlib.rcParams['font.size'] = 14

In [49]:
# We first need to create a susceptibility model.
# Based on a set of parametric surfaces representing TKC, 
# we use VTK to discretize the 3-D space.

mshfile = 'MEsh_TEst.msh'

model_dir = '../../Geological_model/'

# Load mesh file
mesh = Mesh.TensorMesh.readUBC(model_dir+mshfile)

# Create our own mesh!
csx, csy, csz = 10., 10., 10.
ncx, ncy, ncz = 55, 55, 30
npad = 10
hx = [(csx,npad, -1.3),(csx,ncx),(csx,npad, 1.3)]
hy = [(csy,npad, -1.3),(csy,ncy),(csy,npad, 1.3)]
hz = [(csz,npad, -1.3),(csz,ncz), (10,20)]
mesh = Mesh.TensorMesh([hx, hy, hz],x0="CCN")
xc = 300+5.57e5
yc = 600+7.133e6
zc = 450.
x0_new = np.r_[mesh.x0[0]+xc, mesh.x0[1]+yc, mesh.x0[2]+zc]
mesh._x0 = x0_new

#Mesh.TensorMesh.writeUBC(mesh,model_dir+"Mesh_mag.msh")
# Define no-data-value
ndv = -100

# Define survey flight height
Z_bird = 2.

# Read in topo surface
topofile = model_dir+'TKCtopo.dat'
geosurf = [
    [model_dir+'Till.ts',True,True,0],
    [model_dir+'XVK.ts',True,True,1],
    [model_dir+'PK1.ts',True,True,2],
    [model_dir+'PK2.ts',True,True,3],
    [model_dir+'PK3.ts',True,True,4],
    [model_dir+'HK1.ts',True,True,5],
    [model_dir+'VK.ts',True,True,6]
]

In [50]:
import time as tm
import mpl_toolkits.mplot3d as a3
import matplotlib.colors as colors
import scipy as sp
# from mayavi import mlab
# mlab.close(all=True)
# fig = plt.figure(figsize=(8,6))


# mlab.figure(bgcolor=(1.,1.,1.))
modelInd = np.ones(mesh.nC)*ndv
for ii in range(len(geosurf)):
    tin = tm.time()
    print "Computing indices with VTK: " + geosurf[ii][0]
    T, S = io_utils.read_GOCAD_ts(geosurf[ii][0])
    indx = io_utils.surface2inds(T,S,mesh, boundaries=geosurf[ii][1], internal=geosurf[ii][2])
    print "VTK operation completed in " + str(tm.time() - tin) + " sec"
    modelInd[indx] = geosurf[ii][3]
    
#     S = S-1
#     mlab.triangular_mesh(T[:,0], T[:,1], T[:,2], S)
#     mlab.view(azimuth=-45.,distance=1000,elevation=75.)

# arr = mlab.screenshot()

# plt.imshow(arr)


Computing indices with VTK: ../../Geological_model/Till.ts
Extracting indices from grid...
VTK operation completed in 30.5199999809 sec
Computing indices with VTK: ../../Geological_model/XVK.ts
Extracting indices from grid...
VTK operation completed in 5.8599998951 sec
Computing indices with VTK: ../../Geological_model/PK1.ts
Extracting indices from grid...
VTK operation completed in 12.0950000286 sec
Computing indices with VTK: ../../Geological_model/PK2.ts
Extracting indices from grid...
VTK operation completed in 5.27600002289 sec
Computing indices with VTK: ../../Geological_model/PK3.ts
Extracting indices from grid...
VTK operation completed in 6.99499988556 sec
Computing indices with VTK: ../../Geological_model/HK1.ts
Extracting indices from grid...
VTK operation completed in 13.6840000153 sec
Computing indices with VTK: ../../Geological_model/VK.ts
Extracting indices from grid...
VTK operation completed in 15.2269999981 sec

In [51]:
# Load topography file in UBC format and find the active cells
#T, S = io_utils.read_GOCAD_ts(topsurf)
#indx = io_utils.surface2inds(T,S, mesh, boundaries=True, internal=True) 
#actv = np.zeros(mesh.nC)
#actv[indx] = 1

topo = np.genfromtxt(topofile,skip_header=1)
# Find the active cells
actv = Utils.surface2ind_topo(mesh, topo, gridLoc='N')
# actv = PF.Magnetics.getActiveTopo(mesh,topo,'N')

# Create active map to go from reduce set to full
actvMap = Maps.InjectActiveCells(mesh, actv, ndv)

print "Active cells created from topography!"


Active cells created from topography!

In [52]:
# Build model

def getModel(Till=-1e-2, XVK=2e-3, PK1=-0.4, PK2=-0.3, PK3=-0.2, HK1=-0.1, VK=-0.15, bkgr=0.):
    vals = [Till, XVK, PK1, PK2, PK3, HK1, VK]
    model= np.ones(mesh.nC) * bkgr

    for ii, den in zip(range(7),vals):
        model[modelInd == ii] = den
    return model
model = getModel()
model = model[actv]

In [53]:
from ipywidgets.widgets import interact, IntSlider

In [54]:
# Here you can visualize the current model
m_true = actvMap*model
Mesh.TensorMesh.writeModelUBC(mesh,"Synthetic_Grav.den",m_true)
airc = m_true==ndv
m_true[airc] = np.nan
def slide(s,normal):
    colorbar(mesh.plotSlice(m_true, normal=normal, ind=s, clim=np.r_[model.min(), model.max()])[0])
    plt.gca().set_aspect('equal')
interact(slide, s=(0,60), normal=['X','Y','Z'])


Out[54]:
<function __main__.slide>

Forward system:

Now that we have all our spatial components, we can create our linear system. For a single location and single component of the data,

Need to add stuff

the system end up looking like this system:

$$ d^{obs} = \mathbf{F\; \rho}$$

where $\mathbf{F} \in \mathbb{R}^{nd \times nc}$ is our $forward$ operator.


In [55]:
from scipy.interpolate import NearestNDInterpolator
# We need to define the direction of the inducing field
# From old convention, field orientation is given as an azimuth from North 
# (positive clockwise) and dip from the horizontal (positive downward).
# The field parameters at TKC are [H:60,308 nT, I:83.8 d D:25.4 d ]
H0 = (60308.,83.8,25.4)

# We create a synthetic survey with observations in cell center.
X, Y = np.meshgrid(mesh.vectorCCx[npad:-npad:2], mesh.vectorCCy[npad:-npad:2])

# Using our topography, we trape the survey and shift it up by the flight height
Ftopo = NearestNDInterpolator(topo[:,:2], topo[:,2])
Z = Ftopo(Utils.mkvc(X.T),Utils.mkvc(Y.T)) + Z_bird

rxLoc = np.c_[Utils.mkvc(X.T), Utils.mkvc(Y.T), Utils.mkvc(Z.T)]
rxLoc = PF.BaseGrav.RxObs(rxLoc)

srcField = PF.BaseGrav.SrcField([rxLoc])
survey = PF.BaseGrav.LinearSurvey(srcField)

In [56]:
# Now that we have a model and a survey we can build the linear system ...
nactv = np.int(np.sum(actv))

# Creat reduced identity map
idenMap = Maps.IdentityMap(nP=nactv)

# Create the forward model operator
prob = PF.Gravity.GravityIntegral(mesh, mapping=idenMap, actInd=actv)

# Pair the survey and problem
survey.pair(prob)

In [57]:
# Fist time that we ask for predicted data,
# the dense matrix T is calculated.
# This is generally the bottleneck of the integral formulation, 
# in terms of time and memory.
d = prob.fields(model)

# Add noise to the data and assign uncertainties
data = d + randn(len(d))*0.01 # We add some random Gaussian noise (1nT)
wd = np.ones(len(data))*np.sqrt(0.01) # Assign flat uncertainties

survey.dobs = data
survey.std = wd

PF.Gravity.writeUBCobs('GRAV_Synthetic_data.obs',survey,data)

d2D = data.reshape(X.shape)
dat = plt.contourf(X-xc,Y-yc, d2D,40)    
plt.gca().set_aspect('equal')
plt.plot(X.flatten()-xc,Y.flatten()-yc,'k.', ms=2)
plt.colorbar(dat)    
plt.xlabel("Easting (m)")
plt.ylabel("Northing (m)")
plt.title("Gz (mGal)")
xlim(-500, 500)
ylim(-500, 500)


Begin calculation of forward operator: z
Done 0.0 %
Done 10.0 %
Done 20.0 %
Done 30.0 %
Done 40.0 %
Done 50.0 %
Done 60.0 %
Done 70.0 %
Done 80.0 %
Done 90.0 %
Done 100% ...forward operator completed!!

Observation file saved to: GRAV_Synthetic_data.obs
Out[57]:
(-500, 500)

In [58]:
# d2Dx, d2Dy, d2Dz = d[0:survey.nRx].reshape(X.shape), d[survey.nRx:2*survey.nRx].reshape(X.shape),  d[2*survey.nRx:].reshape(X.shape)


# plt.figure(figsize=[5,5])
# xc, yc = (X.min()+X.max())*0.5, (Y.min()+Y.max())*0.5
# #     colorbar(imshow(d2D,extent=[X.min()-xc, X.max()-xc, Y.min()-yc, Y.max()-yc],origin = 'lower'))
# #     plt.contour(X-xc,Y-yc, d2D,20,colors='k')
# # dat = plt.contourf(X-xc,Y-yc, d2Dz,40)

# # plt.gca().set_aspect('equal')


# mlab.figure(bgcolor=(1.,1.,1.))
# mlab.triangular_mesh(T[:,0], T[:,1], T[:,2], S)
# mlab.quiver3d(X,Y,Z,d2Dx, d2Dy, d2Dz, color)
# mlab.view(azimuth=-45.,distance=1500,elevation=75.)
# arr = mlab.screenshot()

# plt.imshow(arr)

Inverse problem

We have generated synthetic data, we now what to see if we can solve the inverse. Using the usual formulation, we seek a model that can reproduce the data, let’s say a least-squares measure of the form:

\begin{equation} \phi_d = \|\mathbf{W}_d \left( \mathbb{F}[\mathbf{m}] - \mathbf{d}^{obs} \right)\|_2^2 \end{equation}

The inverse problem is hard because we don’t have great data coverage, and the Earth is big, and there is usually noise in the data. So we need to add something to regularize it. The simplest way to do it is to penalize solutions that won’t make sense geologically, for example to assume that the model is small. The usual smooth inversion function use an l2-norm measure:

\begin{equation} \phi_d = \|\mathbf{W}_d \left( \mathbb{F}[\mathbf{m}] - \mathbf{d}^{obs} \right)\|_2^2 \\ \phi_m = \beta \Big [ {\| \mathbf{W}_s \;( \mathbf{m - m^{ref}})\|}^2_2 + \sum_{i = x,y,z} {\| \mathbf{W}_i \; \mathbf{G}_i \; \mathbf{m}\|}^2_2 \Big ]\;, \end{equation}

The full objective function to be minimized can be written as: \begin{equation} \phi(m) = \phi_d + \beta \phi_m\;, \end{equation} which will yield our usual small and smooth models.

We propose a fancier regularization function that can allow to recover sparse and blocky solutions. Starting with the well known Ekblom norm: \begin{equation} \phi_m = \sum_{i=1}^{nc} {(x_i^2 + \epsilon^2)}^{p/2} \;, \end{equation} where $x_i$ denotes some function of the model parameter, and $\epsilon$ is a small value to avoid singularity as $m\rightarrow0$. For p=2, we get the usual least-squares measure and we recover the regularization presented above. For $p \leq 1$, the function becomes non-linear which requires some tweaking.

We can linearize the function by updating the penality function iteratively, commonly known as an Iterative Re-weighted Least-Squares (IRLS) method: \begin{equation} \phi_m^{(k)} = \frac{1}{2}\sum_{i=1}^{nc} r_i \; x_i^2 \end{equation} where we added the superscript $\square^{(k)}$ to denote the IRLS iterations. The weights $r(x)$ are computed from model values obtained at a previous iteration such that: \begin{equation} {r}_i ={\Big( {({x_i}^{(k-1)})}^{2} + \epsilon^2 \Big)}^{p/2 - 1} \;, \end{equation} where ${r}(x) \in \mathbb{R}^{nc}$.

In matrix form, our objective function simply becomes: \begin{equation} \phi(m) = \|\mathbf{W}_d \left( \mathbb{F}[\mathbf{m}] - \mathbf{d}^{obs} \right)\|_2^2 + \beta \Big [ {\| \mathbf{W}_s \;\mathbf{R}_s\;( \mathbf{m - m^{ref}})\|}^2_2 + \sum_{i = x,y,z} {\| \mathbf{W}_i\; \mathbf{R}_i \; \mathbf{G}_i \; \mathbf{m}\|}^2_2 \Big ]\;, \end{equation} where the IRLS weights $\mathbf{R}_s$ and $\mathbf{R}_i$ are diagonal matrices defined as: \begin{equation} \begin{split} {R}_{s_{jj}} &= \sqrt{\eta_p}{\Big[ {({m_j}^{(k-1)})}^{2} + \epsilon_p^2 \Big]}^{(p/2 - 1)/2} \\ {R}_{i_{jj}} &= \sqrt{\eta_q}{\Big[ {\left ({{(G_i\;m^{(k-1)})}_j }\right)}^{2} + \epsilon_q^2 \Big]}^{(q/2 - 1)/2} \\ \eta_p &= {\epsilon_p}^{(1-p/2)} \\ \eta_q &= {\epsilon_q}^{(1-q/2)} \;, \end{split} \end{equation}

we added two scaling parameters $\eta_p$ and $\eta_q$ for reasons that we won't dicuss here, but turn out to be important to get stable solves.

In order to initialize the IRLS and get an estimate for the stabilizing parameters $\epsilon_p$ and $\epsilon_q$, we first invert with the smooth $l_2$-norm. The whole IRLS process is implemented with a directive added to the inversion workflow (see below).


In [59]:
# It is potential fields, so we will need to push the inverison down
# Create distance weights from our linera forward operator
# rxLoc = survey.srcField.rxList[0].locs
# wr = PF.Magnetics.get_dist_wgt(mesh, rxLoc, actv, 3., np.min(mesh.hx)/4.)
# wr = wr**2.
wr = np.sum(prob.G**2.,axis=0)**0.5
wr = ( wr/np.max(wr) )
    
reg = Regularization.Sparse(mesh, indActive = actv, mapping = idenMap)
reg.cell_weights = wr

dmis = DataMisfit.l2_DataMisfit(survey)
dmis.Wd = 1/wd

# Add directives to the inversion
opt = Optimization.ProjectedGNCG(maxIter=100 ,lower=-1.,upper=1., maxIterLS = 20, maxIterCG= 10, tolCG = 1e-3)
invProb = InvProblem.BaseInvProblem(dmis, reg, opt)
betaest = Directives.BetaEstimate_ByEig()

# Here is where the norms are applied
eps = [1e-2,1e-2] # Threshold parameters to penalize different model parameters
norms = [0,1,1,1] # Norms applies on the model and 3 gradients [p, qx, qy, qz]


IRLS = Directives.Update_IRLS( norms=norms,  eps=eps, f_min_change = 1e-1, minGNiter=6)
update_Jacobi = Directives.Update_lin_PreCond()
inv = Inversion.BaseInversion(invProb, directiveList=[IRLS,betaest,update_Jacobi])

m0 = np.ones(idenMap.nP)*1e-4

In [60]:
# Run inversion...
mrec = inv.run(m0)


SimPEG.InvProblem will set Regularization.mref to m0.
SimPEG.InvProblem is setting bfgsH0 to the inverse of the eval2Deriv.
                    ***Done using same Solver and solverOpts as the problem***
=============================== Projected GNCG ===============================
  #     beta     phi_d     phi_m       f      |proj(x-g)-x|  LS    Comment   
-----------------------------------------------------------------------------
   0  1.97e+05  1.10e+04  0.00e+00  1.10e+04    5.47e+02      0              
   1  9.84e+04  1.05e+04  1.21e-03  1.06e+04    5.29e+02      0              
   2  4.92e+04  9.85e+03  5.62e-03  1.01e+04    5.13e+02      0   Skip BFGS  
   3  2.46e+04  8.95e+03  1.91e-02  9.42e+03    5.01e+02      0   Skip BFGS  
   4  1.23e+04  7.56e+03  6.03e-02  8.30e+03    4.76e+02      0   Skip BFGS  
   5  6.15e+03  5.81e+03  1.62e-01  6.81e+03    4.24e+02      0   Skip BFGS  
   6  3.07e+03  4.12e+03  3.54e-01  5.21e+03    3.35e+02      0   Skip BFGS  
   7  1.54e+03  2.89e+03  6.26e-01  3.86e+03    2.34e+02      0   Skip BFGS  
   8  7.68e+02  2.05e+03  9.95e-01  2.82e+03    1.58e+02      0   Skip BFGS  
   9  3.84e+02  1.39e+03  1.59e+00  2.01e+03    1.21e+02      0   Skip BFGS  
  10  1.92e+02  8.30e+02  2.63e+00  1.33e+03    8.74e+01      0   Skip BFGS  
  11  9.60e+01  4.10e+02  4.13e+00  8.07e+02    6.20e+01      0   Skip BFGS  
Convergence with smooth l2-norm regularization: Start IRLS steps...
L[p qx qy qz]-norm : [0, 1, 1, 1]
eps_p: 0.01 eps_q: 0.01
Regularization decrease: 0.000e+00
  12  2.22e+02  1.69e+02  4.13e+00  1.09e+03    1.54e+02      0   Skip BFGS  
  13  2.22e+02  1.99e+02  2.46e+00  7.45e+02    8.83e+01      0              
  14  2.22e+02  1.31e+02  2.33e+00  6.50e+02    7.37e+01      0              
  15  2.22e+02  1.36e+02  2.10e+00  6.04e+02    5.68e+01      0              
  16  2.22e+02  1.09e+02  2.10e+00  5.76e+02    5.24e+01      0              
  17  2.22e+02  1.13e+02  2.00e+00  5.58e+02    4.22e+01      0              
Regularization decrease: 6.548e-01
  18  8.89e+02  9.80e+01  2.00e+00  1.88e+03    2.78e+02      0              
  19  8.89e+02  2.59e+02  1.18e+00  1.31e+03    1.51e+02      0              
  20  8.89e+02  2.48e+02  1.10e+00  1.22e+03    1.18e+02      0              
  21  8.89e+02  2.77e+02  1.02e+00  1.19e+03    9.36e+01      0              
  22  8.89e+02  2.48e+02  1.03e+00  1.16e+03    9.12e+01      0              
  23  8.89e+02  2.61e+02  9.95e-01  1.15e+03    7.77e+01      0              
Regularization decrease: 4.986e-01
  24  1.48e+03  2.35e+02  9.95e-01  1.71e+03    2.12e+02      0              
  25  1.48e+03  3.70e+02  7.54e-01  1.49e+03    1.16e+02      0              
  26  1.48e+03  3.53e+02  7.32e-01  1.44e+03    1.05e+02      0              
  27  1.48e+03  3.77e+02  6.97e-01  1.41e+03    8.30e+01      0              
  28  1.48e+03  3.39e+02  7.08e-01  1.39e+03    8.89e+01      0              
  29  1.48e+03  3.53e+02  6.87e-01  1.37e+03    7.26e+01      0              
Regularization decrease: 3.063e-01
  30  1.81e+03  3.21e+02  6.87e-01  1.56e+03    1.60e+02      0              
  31  1.81e+03  4.03e+02  5.79e-01  1.45e+03    1.01e+02      0              
  32  1.81e+03  3.72e+02  5.76e-01  1.41e+03    9.19e+01      0              
  33  1.81e+03  3.88e+02  5.55e-01  1.39e+03    7.90e+01      0              
  34  1.81e+03  3.54e+02  5.63e-01  1.37e+03    7.99e+01      0              
  35  1.81e+03  3.62e+02  5.50e-01  1.36e+03    7.06e+01      0              
Regularization decrease: 2.003e-01
  36  2.13e+03  3.34e+02  5.50e-01  1.50e+03    1.37e+02      0              
  37  2.13e+03  3.96e+02  4.87e-01  1.43e+03    9.30e+01      0              
  38  2.13e+03  3.68e+02  4.87e-01  1.40e+03    8.41e+01      0              
  39  2.13e+03  3.81e+02  4.73e-01  1.39e+03    7.56e+01      0              
  40  2.13e+03  3.53e+02  4.79e-01  1.37e+03    7.41e+01      0              
  41  2.13e+03  3.59e+02  4.71e-01  1.36e+03    6.84e+01      0              
Regularization decrease: 1.471e-01
  42  2.48e+03  3.36e+02  4.71e-01  1.50e+03    1.21e+02      0              
  43  2.48e+03  3.86e+02  4.25e-01  1.44e+03    8.27e+01      0              
  44  2.48e+03  3.65e+02  4.25e-01  1.42e+03    7.75e+01      0              
  45  2.48e+03  3.75e+02  4.15e-01  1.40e+03    6.90e+01      0              
  46  2.48e+03  3.53e+02  4.20e-01  1.39e+03    6.93e+01      0              
  47  2.48e+03  3.59e+02  4.14e-01  1.38e+03    6.28e+01      0              
Regularization decrease: 1.225e-01
  48  2.86e+03  3.40e+02  4.14e-01  1.52e+03    1.12e+02      0              
  49  2.86e+03  3.77e+02  3.82e-01  1.47e+03    7.99e+01      0              
  50  2.86e+03  3.62e+02  3.81e-01  1.45e+03    7.95e+01      0              
  51  2.86e+03  3.71e+02  3.74e-01  1.44e+03    6.68e+01      0              
  52  2.86e+03  3.54e+02  3.77e-01  1.43e+03    7.18e+01      0              
  53  2.86e+03  3.60e+02  3.73e-01  1.43e+03    6.09e+01      0              
Regularization decrease: 1.016e-01
  54  3.25e+03  3.45e+02  3.73e-01  1.56e+03    1.05e+02      0              
  55  3.25e+03  3.79e+02  3.50e-01  1.52e+03    8.26e+01      0              
  56  3.25e+03  3.65e+02  3.50e-01  1.50e+03    7.97e+01      0              
  57  3.25e+03  3.75e+02  3.44e-01  1.49e+03    7.06e+01      0              
  58  3.25e+03  3.61e+02  3.46e-01  1.49e+03    7.08e+01      0              
  59  3.25e+03  3.67e+02  3.42e-01  1.48e+03    6.43e+01      0              
Regularization decrease: 8.279e-02
Minimum decrease in regularization. End of IRLS
------------------------- STOP! -------------------------
1 : |fc-fOld| = 0.0000e+00 <= tolF*(1+|f0|) = 1.0964e+03
0 : |xc-x_last| = 2.1686e-01 <= tolX*(1+|x0|) = 1.0567e-01
0 : |proj(x-g)-x|    = 6.4312e+01 <= tolG          = 1.0000e-01
0 : |proj(x-g)-x|    = 6.4312e+01 <= 1e3*eps       = 1.0000e-02
0 : maxIter   =     100    <= iter          =     60
------------------------- DONE! -------------------------

In [61]:
# Get the final model back to full space
m_lp = actvMap*mrec
m_lp[airc] = np.nan

# Get the smooth model aslo
m_l2 = actvMap*reg.l2model
m_l2[airc] = np.nan

# Save both models to file
Mesh.TensorMesh.writeModelUBC(mesh,'SimPEG_GRAV_l2l2.den',m_l2)
Mesh.TensorMesh.writeModelUBC(mesh,'SimPEG_GRAV_lplq.den',m_lp)

In [62]:
# Plot the recoverd models 
vmin, vmax = -.3, 0.05

mesh = Mesh.TensorMesh([mesh.hx, mesh.hy, mesh.hz],x0="CCN")

def slide(s,normal):
    
    if normal == "Z":
        fig = plt.figure(figsize(10*1.2, 8))
    else:
        fig = plt.figure(figsize(10*1.2, 4))
        
    ax1 = plt.subplot(2,2,3)
    dat = mesh.plotSlice(m_lp, ax = ax1, normal=normal, ind=s, clim=np.r_[vmin, vmax],pcolorOpts={'cmap':'viridis'})
#     plt.colorbar(dat[0])
    plt.gca().set_aspect('equal')
    plt.title('Compact model')
    
    if normal == "Z":
        xlim(-600, 600)
        ylim(-600, 600.)    
    else:
        xlim(-600, 600)
        ylim(-500, 0.) 
        
    ax2 = plt.subplot(2,2,1)
    dat = mesh.plotSlice(m_l2, ax = ax2, normal=normal, ind=s, clim=np.r_[vmin, vmax],pcolorOpts={'cmap':'viridis'})
#     plt.colorbar(dat[0])
    plt.gca().set_aspect('equal')
    plt.title('Smooth model')
    
    if normal == "Z":
        xlim(-600, 600)
        ylim(-600, 600.)    
    else:
        xlim(-600, 600)
        ylim(-500, 0.) 
        
    ax2.set_xticklabels([])
        
    ax2 = plt.subplot(1,2,2)
    dat = mesh.plotSlice(m_true, ax = ax2, normal=normal, ind=s, clim=np.r_[vmin, vmax],pcolorOpts={'cmap':'viridis'})
#     plt.colorbar(dat[0])
    plt.gca().set_aspect('equal')
    plt.title('True model')
    
    pos =  ax2.get_position()

    ax2.yaxis.set_visible(False)
    if normal == "Z":
        xlim(-600, 600)
        ylim(-600, 600.) 
        ax2.set_position([pos.x0 -0.04 , pos.y0,  pos.width, pos.height])
    else:
        xlim(-600, 600)
        ylim(-500, 0.) 

    pos =  ax2.get_position()
    cbarax = fig.add_axes([pos.x0 + 0.375 , pos.y0 + 0.05,  pos.width*0.1, pos.height*0.75])  ## the parameters are the specified position you set
    cb = fig.colorbar(dat[0],cax=cbarax, orientation="vertical", ax = ax2, ticks=np.linspace(vmin,vmax, 4))
    cb.set_label("Susceptibility (SI)",size=12)
    fig.savefig('PF_Compact.png',dpi = 150)
    
interact(slide, s=(0,60), normal=['X','Y','Z'])

# interact(lambda ind: viz(m_l2, ind, normal="Z"), ind=IntSlider(min=0, max=32,step=1, value=28))



In [45]:
# Lets compare the distribution of model parameters and model gradients
plt.figure(figsize=[15,5])
ax = plt.subplot(121)
plt.hist(reg.l2model,100)
plt.plot((eps[0],eps[0]),(0,mesh.nC),'r--')
plt.yscale('log', nonposy='clip')
plt.title('Hist model - l2-norm')
ax = plt.subplot(122)
plt.hist(reg.regmesh.cellDiffxStencil*reg.l2model,100)
plt.plot((eps[1],eps[1]),(0,mesh.nC),'r--')
plt.yscale('log', nonposy='clip')
plt.title('Hist model gradient - l2-norm')

# Lets look at the distribution of model parameters and model gradients
plt.figure(figsize=[15,5])
ax = plt.subplot(121)
plt.hist(mrec,100)
plt.plot((eps[0],eps[0]),(0,mesh.nC),'r--')
plt.yscale('log', nonposy='clip')
plt.title('Hist model - lp-norm')
ax = plt.subplot(122)
plt.hist(reg.regmesh.cellDiffxStencil*mrec,100)
plt.plot((eps[1],eps[1]),(0,mesh.nC),'r--')
plt.yscale('log', nonposy='clip')
plt.title('Hist model gradient - lp-norm')


Out[45]:
<matplotlib.text.Text at 0x2049d5f8>

In [ ]:
Mesh.MeshIO.TensorMeshIO.writeUBC(mesh,'MAG_mesh.msh')

# import pickle
# Results = {"mesh":mesh, "model_true":sus, "model_pred":m_IRLS, "Obs":data, "XYZ": xyz}
# outputs = open("Magresults", 'wb')
# pickle.dump(Results, outputs)
# outputs.close()

In [ ]: